#include <asm/pal.h>
#include <asm/vhpt.h>
#include <public/hvm/ioreq.h>
+#include <asm/tlbflush.h>
+#include <asm/regionreg.h>
#define CONFIG_DOMAIN0_CONTIGUOUS
unsigned long dom0_start = -1L;
/* FIXME: where these declarations should be there ? */
extern void domain_pend_keyboard_interrupt(int);
extern long platform_is_hp_ski(void);
-extern unsigned long allocate_metaphysical_rr(void);
-extern int allocate_rid_range(struct domain *, unsigned long);
extern void sync_split_caches(void);
-extern void init_all_rr(struct vcpu *);
extern void serial_input_init(void);
static void init_switch_stack(struct vcpu *v);
/* this belongs in include/asm, but there doesn't seem to be a suitable place */
void arch_domain_destroy(struct domain *d)
{
- printf("arch_domain_destroy: not implemented\n");
- //free_page((unsigned long)d->mm.perdomain_pt);
- free_xenheap_page(d->shared_info);
+ struct page *page;
+ struct list_head *ent, *prev;
+
+ if (d->arch.mm->pgd != NULL)
+ {
+ list_for_each ( ent, &d->arch.mm->pt_list )
+ {
+ page = list_entry(ent, struct page, list);
+ prev = ent->prev;
+ list_del(ent);
+ free_xenheap_page(page_to_virt(page));
+ ent = prev;
+ }
+ pgd_free(d->arch.mm->pgd);
+ }
+ if (d->arch.mm != NULL)
+ xfree(d->arch.mm);
+ if (d->shared_info != NULL)
+ free_xenheap_page(d->shared_info);
+
+ deallocate_rid_range(d);
+
+ /* It is really good in this? */
+ flush_tlb_all();
+
+ /* It is really good in this? */
+ vhpt_flush();
}
static void default_idle(void)
void free_vcpu_struct(struct vcpu *v)
{
+ if (v->arch.privregs != NULL)
+ free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
if ((d->arch.mm = xmalloc(struct mm_struct)) == NULL)
goto fail_nomem;
memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+ INIT_LIST_HEAD(&d->arch.mm->pt_list);
if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL)
goto fail_nomem;
return 0;
}
+static void relinquish_memory(struct domain *d, struct list_head *list)
+{
+ struct list_head *ent;
+ struct page *page;
+#ifndef __ia64__
+ unsigned long x, y;
+#endif
+
+ /* Use a recursive lock, as we may enter 'free_domheap_page'. */
+ spin_lock_recursive(&d->page_alloc_lock);
+ ent = list->next;
+ while ( ent != list )
+ {
+ page = list_entry(ent, struct page, list);
+ /* Grab a reference to the page so it won't disappear from under us. */
+ if ( unlikely(!get_page(page, d)) )
+ {
+ /* Couldn't get a reference -- someone is freeing this page. */
+ ent = ent->next;
+ continue;
+ }
+
+ if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+ put_page_and_type(page);
+
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+
+#ifndef __ia64__
+ /*
+ * Forcibly invalidate base page tables at this point to break circular
+ * 'linear page table' references. This is okay because MMU structures
+ * are not shared across domains and this domain is now dead. Thus base
+ * tables are not in use so a non-zero count means circular reference.
+ */
+ y = page->u.inuse.type_info;
+ for ( ; ; )
+ {
+ x = y;
+ if ( likely((x & (PGT_type_mask|PGT_validated)) !=
+ (PGT_base_page_table|PGT_validated)) )
+ break;
+
+ y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);
+ if ( likely(y == x) )
+ {
+ free_page_type(page, PGT_base_page_table);
+ break;
+ }
+ }
+#endif
+
+ /* Follow the list chain and /then/ potentially free the page. */
+ ent = ent->next;
+ put_page(page);
+ }
+
+ spin_unlock_recursive(&d->page_alloc_lock);
+}
+
void domain_relinquish_resources(struct domain *d)
{
- /* FIXME */
- printf("domain_relinquish_resources: not implemented\n");
+ /* Relinquish every page of memory. */
+
+ /* xenheap_list is not used in ia64. */
+ BUG_ON(!list_empty(&d->xenpage_list));
+
+ relinquish_memory(d, &d->page_list);
}
// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
{
struct mm_struct *mm = d->arch.mm;
- struct page *p = (struct page *)0;
+ struct page *pt, *p = (struct page *)0;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
}
pgd = pgd_offset(mm,mpaddr);
if (pgd_none(*pgd))
+ {
pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pgd_val(*pgd));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
pud = pud_offset(pgd, mpaddr);
if (pud_none(*pud))
+ {
pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pud_val(*pud));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
pmd = pmd_offset(pud, mpaddr);
if (pmd_none(*pmd))
+ {
pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pmd_val(*pmd));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
pte = pte_offset_map(pmd, mpaddr);
if (pte_none(*pte)) {
void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
{
struct mm_struct *mm = d->arch.mm;
+ struct page *pt;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
}
pgd = pgd_offset(mm,mpaddr);
if (pgd_none(*pgd))
+ {
pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pgd_val(*pgd));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
pud = pud_offset(pgd, mpaddr);
if (pud_none(*pud))
+ {
pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pud_val(*pud));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
pmd = pmd_offset(pud, mpaddr);
if (pmd_none(*pmd))
+ {
pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pmd_val(*pmd));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
pte = pte_offset_map(pmd, mpaddr);
if (pte_none(*pte)) {